Meteo Imp Analsyis
  • Library Docs

On this page

  • Gaps all variables
  • Gap in only 1 variable
  • Loss only gap
  • Smoother

Hainich with ERA-Interim

Manual fine tuning learning process for presentation 18 Jan 2023

%load_ext autoreload
%autoreload 2
from meteo_imp.kalman.fastai import *
from meteo_imp.kalman.filter import *
from meteo_imp.utils import *
from meteo_imp.data import *

from fastai.tabular.learner import *
from fastai.learner import *
from fastai.callback.all import *
from fastcore.foundation import L

from meteo_imp.kalman.fastai import show_results
import pandas as pd
import numpy as np
import torch

from sklearn.decomposition import PCA
reset_seed()
hai = pd.read_parquet(hai_path64)
hai_era = pd.read_parquet(hai_era_path64)
# dls = imp_dataloader(hai64, hai_era64, var_sel = gen_var_sel(['TA', 'SW_IN', 'VPD']), block_len=200, gap_len=10, bs=20, control_lags=[1])
pc = PCA().fit(hai)
d0 = hai.iloc[0:1]
tr0 = pc.transform(d0)
tr0
array([[-121.11917652,   -7.06844313,    0.87975241]])
t0 = np.random.randn(5,1) * 10
tt = np.hstack([t0, 2 * t0 + np.random.randn(5,1), 3 * t0 + np.random.randn(5,1)])
tt = np.vstack([tt, -tt])
tt.mean(0)
array([1.77635684e-16, 0.00000000e+00, 0.00000000e+00])
def pca(X):
    # Data matrix X, assumes 0-centered
    n, m = X.shape
    assert np.allclose(X.mean(axis=0), np.zeros(m))
    # Compute covariance matrix
    C = np.dot(X.T, X) / (n-1)
    # Eigen decomposition
    eigen_vals, eigen_vecs = np.linalg.eig(C)
    # Project X onto PC space
    X_pca = np.dot(X, eigen_vecs)
    return X_pca, eigen_vecs
pca(tt)
(array([[-9.15947487e+00, -3.03773243e-01,  3.81626348e-01],
        [ 2.00045215e+01, -1.99595583e-02,  1.53745860e+00],
        [ 1.33744310e+01,  2.01395206e-02, -3.19819214e-02],
        [-8.99057811e+00,  1.24208069e-01,  9.57988123e-01],
        [ 3.14616875e+01, -4.88141095e-02, -5.79117681e-01],
        [ 9.15947487e+00,  3.03773243e-01, -3.81626348e-01],
        [-2.00045215e+01,  1.99595583e-02, -1.53745860e+00],
        [-1.33744310e+01, -2.01395206e-02,  3.19819214e-02],
        [ 8.99057811e+00, -1.24208069e-01, -9.57988123e-01],
        [-3.14616875e+01,  4.88141095e-02,  5.79117681e-01]]),
 array([[-0.26545847, -0.95659856,  0.12021234],
        [-0.53518386,  0.04249433, -0.84366608],
        [-0.80194142,  0.28829401,  0.52323659]]))
sk_pc = PCA(2).fit(tt)
tt @ sk_pc.components_.T
array([[ -9.15947487,   0.38162635],
       [ 20.00452148,   1.5374586 ],
       [ 13.37443103,  -0.03198192],
       [ -8.99057811,   0.95798812],
       [ 31.46168754,  -0.57911768],
       [  9.15947487,  -0.38162635],
       [-20.00452148,  -1.5374586 ],
       [-13.37443103,   0.03198192],
       [  8.99057811,  -0.95798812],
       [-31.46168754,   0.57911768]])
tr = sk_pc.transform(tt)
tr
array([[ -9.15947487,   0.38162635],
       [ 20.00452148,   1.5374586 ],
       [ 13.37443103,  -0.03198192],
       [ -8.99057811,   0.95798812],
       [ 31.46168754,  -0.57911768],
       [  9.15947487,  -0.38162635],
       [-20.00452148,  -1.5374586 ],
       [-13.37443103,   0.03198192],
       [  8.99057811,  -0.95798812],
       [-31.46168754,   0.57911768]])
sk_pc.components_.T
array([[-0.26545847,  0.12021234],
       [-0.53518386, -0.84366608],
       [-0.80194142,  0.52323659]])
(sk_pc.components_ @ tt.T).T
array([[ -9.15947487,   0.38162635],
       [ 20.00452148,   1.5374586 ],
       [ 13.37443103,  -0.03198192],
       [ -8.99057811,   0.95798812],
       [ 31.46168754,  -0.57911768],
       [  9.15947487,  -0.38162635],
       [-20.00452148,  -1.5374586 ],
       [-13.37443103,   0.03198192],
       [  8.99057811,  -0.95798812],
       [-31.46168754,   0.57911768]])
tt[0, None]
array([[2.76792539, 4.56712931, 7.45746711]])
sk_pc.components_.shape
(3, 3)
tt[0].shape
(3,)
(sk_pc.components_ @ tt[0])
array([-9.15947487,  0.38162635])
tt[0]
array([2.76792539, 4.56712931, 7.45746711])
sk_pc.components_.T @ tr[0]
array([2.47733634, 4.58003795, 7.54504311])
sk_pc.inverse_transform(tr[0])
array([2.47733634, 4.58003795, 7.54504311])
d0.to_numpy()
array([[-0.6  ,  0.   ,  0.222]])
hai.iloc[0]
TA      -0.600
SW_IN    0.000
VPD      0.222
Name: 2000-01-01 00:30:00, dtype: float64

\[ x = y\Lambda \]

pc.components_
array([[ 0.01681572,  0.99979324,  0.01143269],
       [ 0.93010891, -0.01983747,  0.36674772],
       [-0.36689868, -0.00446652,  0.93025018]])
np.linalg.inv(pc.components_)
array([[ 0.01681572,  0.93010891, -0.36689868],
       [ 0.99979324, -0.01983747, -0.00446652],
       [ 0.01143269,  0.36674772,  0.93025018]])

assuming that the control has the same dimensions of the observations then if we are doing a local slope model we have \(B \in \mathbb{R}^{state \times contr}\): \[ B = \begin{bmatrix} -I & I \\ 0 & 0 \end{bmatrix}\]

from torch import hstack, eye, vstack, ones, zeros, tensor
from functools import partial
def set_dtype(*args, dtype=torch.float64):
    return [partial(arg, dtype=dtype) for arg in args] 
eye, ones, zeros, tensor = set_dtype(eye, ones, zeros, tensor)
def init_smart(n_dim_obs, n_dim_state, df, pca=True):
    # n_dim_obs == n_dim_contr
    if pca:
        comp = PCA(n_dim_state).fit(df).components_
        obs_matrix = tensor(comp.T) # transform state -> obs
        contr_matrix = tensor(comp) # transform obs -> state
    else:
        obs_matrix, contr_matrix = eye(n_dim_obs), eye(n_dim_obs)
        
    return KalmanFilter(
        trans_matrix =     vstack([hstack([eye(n_dim_state),                eye(n_dim_state)]),
                                   hstack([zeros(n_dim_state, n_dim_state), eye(n_dim_state)])]),
        trans_off =        zeros(n_dim_state * 2),        
        trans_cov =        eye(n_dim_state * 2)*.1,        
        obs_matrix =       hstack([obs_matrix, zeros(n_dim_obs, n_dim_state)]),
        obs_off =          zeros(n_dim_obs),          
        obs_cov =          eye(n_dim_obs)*.01,            
        contr_matrix =     vstack([hstack([-contr_matrix,                  contr_matrix]),
                                   hstack([ zeros(n_dim_state,n_dim_obs), zeros(n_dim_state, n_dim_obs)])]),
        init_state_mean =  zeros(n_dim_state * 2),        
        init_state_cov =   eye(n_dim_state * 2) * 3,
    ) 
np.hstack([np.eye(2), np.eye(2)])
array([[1., 0., 1., 0.],
       [0., 1., 0., 1.]])
init_smart(3,2, hai)

Kalman Filter (3 obs, 4 state, 6 contr)

trans matrix (A)

state x_0 x_1 x_2 x_3
x_0 1.0000 0.0000 1.0000 0.0000
x_1 0.0000 1.0000 0.0000 1.0000
x_2 0.0000 0.0000 1.0000 0.0000
x_3 0.0000 0.0000 0.0000 1.0000

trans cov (Q)

state x_0 x_1 x_2 x_3
x_0 0.1000 0.0000 0.0000 0.0000
x_1 0.0000 0.1000 0.0000 0.0000
x_2 0.0000 0.0000 0.1000 0.0000
x_3 0.0000 0.0000 0.0000 0.1000

trans off

state offset
x_0 0.0000
x_1 0.0000
x_2 0.0000
x_3 0.0000

obs matrix (H)

variable x_0 x_1 x_2 x_3
y_0 0.0168 0.9301 0.0000 0.0000
y_1 0.9998 -0.0198 0.0000 0.0000
y_2 0.0114 0.3667 0.0000 0.0000

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0100 0.0000 0.0000
y_1 0.0000 0.0100 0.0000
y_2 0.0000 0.0000 0.0100

obs off

variable offset
y_0 0.0000
y_1 0.0000
y_2 0.0000

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.0168 -0.9998 -0.0114 0.0168 0.9998 0.0114
x_1 -0.9301 0.0198 -0.3667 0.9301 -0.0198 0.3667
x_2 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
x_3 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000

init state mean

state mean
x_0 0.0000
x_1 0.0000
x_2 0.0000
x_3 0.0000

init state cov

state x_0 x_1 x_2 x_3
x_0 3.0000 0.0000 0.0000 0.0000
x_1 0.0000 3.0000 0.0000 0.0000
x_2 0.0000 0.0000 3.0000 0.0000
x_3 0.0000 0.0000 0.0000 3.0000
class PersistentRecorder(Callback):
    order = 70
    name = "per_recorder"
    attrs = ['lrs', 'iters', 'losses', 'values']
    def before_fit(self):
        "Prepare state for training"
        for attr in self.attrs:
            if not hasattr(self.per_recorder, attr): setattr(self.per_recorder, attr, [])

    def after_batch(self):
        for attr in self.attrs:
            setattr(self.per_recorder, attr, getattr(self.recorder, attr))
models = []
dls = imp_dataloader(hai, hai_era, var_sel = ['TA', 'SW_IN', 'VPD'], block_len=200, gap_len=10, bs=20, control_lags=[1], n_rep=2)
len(dls.valid.items)
items = [dls.valid.items[i] for i in [10, 50, 100, 200, 300, 400]]
def train_show_save(learn, n_iter, lr):
    learn.fit(n_iter, lr)
    models.append(learn.model.state_dict().copy())
    learn.recorder.plot_loss()
    items = [dls.valid.items[i] for i in [10, 50, 100, 200, 300, 400]]
    return show_results(learn, items = items, control=hai_control)

Gaps all variables

model = init_smart(3,3,3, hai, pca=False).cuda()
loss = loss_func=KalmanLoss(only_gap=False)
learn = Learner(dls, model, loss, cbs=[Float64Callback], metrics=imp_metrics)
learn.model.use_smooth = True
show_results(learn, items=items, control=hai_control)
train_show_save(learn, 1, 1e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 -87.370314 -96.212915 0.069196 0.210351 0.946861 -250571814513532732431918956544.000000 02:37

trans matrix (A)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 1.1489 0.0042 0.0120 0.8404 -0.0794 -0.1127
x_1 -0.0954 1.1464 -0.0546 0.1177 0.8560 0.0599
x_2 -0.0225 0.0420 1.1357 -0.0717 -0.1190 0.8475
x_3 0.1201 0.0661 0.1170 1.1840 -0.1150 -0.1073
x_4 0.0770 0.1299 0.0068 -0.1847 1.0987 -0.1500
x_5 0.1084 0.0358 0.1133 -0.1809 -0.0281 1.1533

trans cov (Q)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 0.0341 0.0493 0.0361 -0.0008 -0.0312 0.0026
x_1 0.0493 0.1125 0.0531 0.0475 0.0047 0.0509
x_2 0.0361 0.0531 0.0399 0.0001 -0.0327 0.0058
x_3 -0.0008 0.0475 0.0001 0.0608 0.0614 0.0585
x_4 -0.0312 0.0047 -0.0327 0.0614 0.0901 0.0554
x_5 0.0026 0.0509 0.0058 0.0585 0.0554 0.0594

trans off

state offset
x_0 0.0008
x_1 -0.0083
x_2 0.0067
x_3 0.0023
x_4 -0.0007
x_5 0.0005

obs matrix (H)

variable x_0 x_1 x_2 x_3 x_4 x_5
y_0 0.8238 -0.1897 -0.1014 -0.0841 0.1914 0.1286
y_1 0.0814 0.8353 0.1048 0.1040 0.0127 0.0689
y_2 -0.0742 -0.1502 0.8296 0.1282 0.1811 -0.0746

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0084 0.0000 0.0000
y_1 0.0000 0.0084 0.0000
y_2 0.0000 0.0000 0.0084

obs off

variable offset
y_0 0.0017
y_1 -0.0046
y_2 0.0026

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -1.0290 -0.0485 -0.0022 0.9297 0.0792 -0.0247
x_1 -0.0267 -0.8778 -0.0233 0.0113 0.9099 0.0261
x_2 -0.0018 -0.0592 -1.0044 -0.0310 0.0674 0.9204
x_3 -0.0503 -0.0312 -0.0556 -0.0440 -0.0529 -0.0587
x_4 -0.0375 -0.0537 -0.0246 -0.0412 -0.0352 -0.0338
x_5 -0.0653 -0.0179 -0.0575 -0.0655 -0.0307 -0.0449

init state mean

state mean
x_0 -0.0393
x_1 0.0044
x_2 -0.0092
x_3 0.0207
x_4 -0.0094
x_5 -0.0021

init state cov

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 2.5500 -0.1402 -0.0082 0.5233 0.3011 0.0882
x_1 -0.1402 2.4578 0.1502 0.0355 0.5714 -0.0459
x_2 -0.0082 0.1502 2.5045 0.2739 0.0634 0.5195
x_3 0.5233 0.0355 0.2739 2.3814 -0.2201 -0.1842
x_4 0.3011 0.5714 0.0634 -0.2201 2.3880 -0.1162
x_5 0.0882 -0.0459 0.5195 -0.1842 -0.1162 2.3874

train_show_save(learn, 1, 1e-3)
/home/simone/anaconda3/envs/data-science/lib/python3.10/site-packages/fastai/callback/core.py:69: UserWarning: You are shadowing an attribute (__class__) that exists in the learner. Use `self.learn.__class__` to avoid this
  warn(f"You are shadowing an attribute ({name}) that exists in the learner. Use `self.learn.{name}` to avoid this")
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 -108.023250 -113.113034 0.073622 0.198386 0.956828 -105578612000578019271909572608.000000 02:37

trans matrix (A)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 1.1440 -0.0057 0.0107 0.8262 -0.0845 -0.1205
x_1 -0.0534 1.2089 -0.0223 0.0948 0.7937 0.0494
x_2 -0.0289 0.0176 1.1417 -0.0702 -0.1154 0.8351
x_3 0.1277 0.0589 0.1074 1.1787 -0.1203 -0.1024
x_4 0.0654 0.1560 0.0043 -0.1781 1.1087 -0.1415
x_5 0.0959 0.0439 0.1197 -0.1725 -0.0340 1.1496

trans cov (Q)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 0.0207 0.0254 0.0210 -0.0052 -0.0232 -0.0033
x_1 0.0254 0.0548 0.0255 0.0259 0.0034 0.0281
x_2 0.0210 0.0255 0.0222 -0.0056 -0.0241 -0.0023
x_3 -0.0052 0.0259 -0.0056 0.0473 0.0504 0.0465
x_4 -0.0232 0.0034 -0.0241 0.0504 0.0695 0.0473
x_5 -0.0033 0.0281 -0.0023 0.0465 0.0473 0.0477

trans off

state offset
x_0 0.0004
x_1 -0.0055
x_2 0.0048
x_3 0.0027
x_4 -0.0033
x_5 0.0021

obs matrix (H)

variable x_0 x_1 x_2 x_3 x_4 x_5
y_0 0.7902 -0.2032 -0.1217 -0.0986 0.2164 0.1090
y_1 0.0160 0.7417 0.0555 0.1016 0.1041 0.0374
y_2 -0.1103 -0.1594 0.7916 0.1185 0.2096 -0.0949

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0077 0.0000 0.0000
y_1 0.0000 0.0077 0.0000
y_2 0.0000 0.0000 0.0077

obs off

variable offset
y_0 -0.0009
y_1 -0.0053
y_2 -0.0021

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -1.0277 -0.0595 -0.0019 0.9181 0.0838 -0.0194
x_1 -0.0414 -0.8661 -0.0323 0.0110 0.8741 0.0262
x_2 -0.0029 -0.0647 -1.0045 -0.0319 0.0757 0.8957
x_3 -0.0678 -0.0368 -0.0564 -0.0559 -0.0610 -0.0639
x_4 -0.0374 -0.0690 -0.0209 -0.0485 -0.0406 -0.0354
x_5 -0.0591 -0.0267 -0.0753 -0.0633 -0.0407 -0.0555

init state mean

state mean
x_0 -0.0396
x_1 -0.0009
x_2 -0.0049
x_3 0.0153
x_4 -0.0139
x_5 -0.0094

init state cov

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 2.6364 -0.1456 -0.0020 0.5695 0.3986 0.1628
x_1 -0.1456 2.3292 0.1064 0.2074 0.7591 0.1475
x_2 -0.0020 0.1064 2.4205 0.3489 0.1488 0.6143
x_3 0.5695 0.2074 0.3489 2.2351 -0.4355 -0.3277
x_4 0.3986 0.7591 0.1488 -0.4355 2.1625 -0.3445
x_5 0.1628 0.1475 0.6143 -0.3277 -0.3445 2.2518

# learn.save("17_jan_all_gaps")
Path('models/17_jan_all_gaps.pth')
train_show_save(learn, 2, 1e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 -122.620241 -126.621236 0.077944 0.202366 0.948695 -115274990280631988956159803392.000000 02:38
1 -134.455010 -139.768441 0.074152 0.188695 0.959962 -103653315974387814635399020544.000000 02:31

trans matrix (A)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 1.1347 -0.0147 0.0100 0.8021 -0.1122 -0.1444
x_1 -0.0388 1.2480 -0.0185 0.0773 0.7576 0.0414
x_2 -0.0257 -0.0018 1.1500 -0.0814 -0.1272 0.8203
x_3 0.1332 0.0451 0.0866 1.1612 -0.1103 -0.0917
x_4 0.0559 0.1720 0.0056 -0.1625 1.1052 -0.1248
x_5 0.0721 0.0457 0.1226 -0.1523 -0.0385 1.1357

trans cov (Q)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 0.0146 0.0183 0.0142 -0.0081 -0.0202 -0.0067
x_1 0.0183 0.0445 0.0187 0.0148 -0.0014 0.0168
x_2 0.0142 0.0187 0.0143 -0.0065 -0.0187 -0.0044
x_3 -0.0081 0.0148 -0.0065 0.0340 0.0393 0.0343
x_4 -0.0202 -0.0014 -0.0187 0.0393 0.0550 0.0378
x_5 -0.0067 0.0168 -0.0044 0.0343 0.0378 0.0361

trans off

state offset
x_0 0.0014
x_1 -0.0059
x_2 0.0039
x_3 0.0016
x_4 -0.0036
x_5 0.0032

obs matrix (H)

variable x_0 x_1 x_2 x_3 x_4 x_5
y_0 0.7310 -0.1642 -0.0983 -0.1030 0.2236 0.0907
y_1 0.0158 0.6777 0.0705 0.1196 0.1642 0.0316
y_2 -0.1124 -0.1249 0.7399 0.1211 0.2193 -0.0846

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0064 0.0000 0.0000
y_1 0.0000 0.0065 0.0000
y_2 0.0000 0.0000 0.0065

obs off

variable offset
y_0 -0.0019
y_1 -0.0046
y_2 -0.0026

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -1.0316 -0.0708 -0.0019 0.8851 0.0973 -0.0094
x_1 -0.0727 -0.8659 -0.0584 0.0075 0.7964 0.0142
x_2 0.0098 -0.0734 -1.0045 -0.0156 0.0845 0.8489
x_3 -0.0977 -0.0434 -0.0574 -0.0723 -0.0760 -0.0730
x_4 -0.0309 -0.1114 -0.0185 -0.0559 -0.0634 -0.0420
x_5 -0.0503 -0.0424 -0.1056 -0.0644 -0.0571 -0.0729

init state mean

state mean
x_0 -0.0585
x_1 -0.0141
x_2 -0.0070
x_3 0.0239
x_4 -0.0043
x_5 -0.0069

init state cov

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 2.9257 0.1245 0.1987 0.4475 0.2435 0.0893
x_1 0.1245 2.4393 0.3641 0.2107 0.7802 0.1895
x_2 0.1987 0.3641 2.4862 0.2959 0.0089 0.6053
x_3 0.4475 0.2107 0.2959 2.1058 -0.5771 -0.4644
x_4 0.2435 0.7802 0.0089 -0.5771 1.9818 -0.5064
x_5 0.0893 0.1895 0.6053 -0.4644 -0.5064 2.0952

train_show_save(learn, 2, 1e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 -149.288297 -152.804403 0.072760 0.179190 0.956827 -61340364986143763995601928192.000000 02:35
1 -160.099637 -165.002097 0.071263 0.181697 0.963587 -48387608947773502947627892736.000000 02:37

# learn.save("17_jan_all_gaps_final")
Path('models/17_jan_all_gaps_final.pth')
p = show_results(learn, control=hai_control, items = [items[i] for i in [0,5,4]])

Gap in only 1 variable

dls2 = imp_dataloader(hai, hai_era, var_sel = ['TA'], block_len=200, gap_len=10, bs=20, control_lags=[1], n_rep=2)
model2 = init_smart(3,3, hai, pca=True).cuda()
loss2 = loss_func=KalmanLoss(only_gap=False)
learn2 = Learner(dls2, model2, loss, cbs=[Float64Callback], metrics=imp_metrics)
train_show_save(learn2, 1, 1e-3)
0.00% [0/1 00:00<?]
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time

0.00% [0/91 00:00<?]
NotImplementedError: Module [KalmanFilter] is missing the required "forward" function

Disable smoother

model.use_smooth = False
train_show_save(learn, 3, 1e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 534.159578 465.194522 0.284379 0.329570 -1.919198 -241825956681560181426503024640.000000 02:24

trans matrix (A)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 0.6761 0.1157 -0.0095 0.5895 0.0728 0.0850
x_1 0.0309 0.6675 0.1639 0.2296 0.5542 -0.1129
x_2 0.1836 -0.1476 0.6498 0.1684 -0.2142 0.5899
x_3 -0.1246 0.0361 0.0138 0.6844 -0.0830 0.1501
x_4 -0.0167 -0.0399 0.0044 0.0089 0.7537 0.0580
x_5 0.0341 0.0300 -0.0436 -0.0362 0.2878 0.6487

trans cov (Q)

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 0.5106 0.3768 0.4383 -0.1720 -0.2863 -0.1749
x_1 0.3768 0.5687 0.1931 -0.2781 0.0461 0.0798
x_2 0.4383 0.1931 0.4406 -0.0734 -0.3650 -0.2384
x_3 -0.1720 -0.2781 -0.0734 0.5150 0.3608 0.3418
x_4 -0.2863 0.0461 -0.3650 0.3608 0.8281 0.6975
x_5 -0.1749 0.0798 -0.2384 0.3418 0.6975 0.6156

trans off

state offset
x_0 -0.0166
x_1 -0.0196
x_2 -0.0446
x_3 0.0033
x_4 -0.0022
x_5 0.0107

obs matrix (H)

variable x_0 x_1 x_2 x_3 x_4 x_5
y_0 -0.3096 0.5414 -0.0652 0.4906 -0.0643 -0.3834
y_1 0.6288 -0.3366 -0.3681 0.0321 -0.2062 0.3634
y_2 -0.2813 0.0500 0.4738 -0.0601 0.2872 -0.1334

obs cov (R)

variable y_0 y_1 y_2
y_0 0.3678 0.0000 0.0000
y_1 0.0000 0.3707 0.0000
y_2 0.0000 0.0000 0.3693

obs off

variable offset
y_0 -0.0197
y_1 -0.0452
y_2 -0.0451

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.9410 -0.5681 -0.8722 0.9998 1.2958 1.0828
x_1 -0.7019 -0.7354 -0.9277 1.2348 1.1344 0.9620
x_2 -0.9322 -0.7328 -0.8307 1.0310 1.1112 1.1017
x_3 0.0914 0.2592 0.0286 0.0638 0.1644 -0.0007
x_4 0.2175 -0.0398 0.0226 0.2072 -0.1205 -0.0117
x_5 -0.0578 0.1485 0.0908 -0.0613 0.0627 0.0792

init state mean

state mean
x_0 0.0290
x_1 -0.0320
x_2 -0.0494
x_3 0.0296
x_4 0.0387
x_5 0.1010

init state cov

state x_0 x_1 x_2 x_3 x_4 x_5
x_0 3.7171 2.7054 2.8366 2.0078 1.3904 2.3275
x_1 2.7054 4.5189 2.1010 1.4436 1.8804 2.8057
x_2 2.8366 2.1010 3.3027 1.3323 0.9307 2.0280
x_3 2.0078 1.4436 1.3323 3.3867 2.1461 3.0821
x_4 1.3904 1.8804 0.9307 2.1461 2.5441 2.6815
x_5 2.3275 2.8057 2.0280 3.0821 2.6815 3.7653
IndexError: list index out of range
train_show_save(learn, 1, 1e-2)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 272.526685 207.314376 0.279615 0.290300 -3.962859 -113462721102480788295669252096.000000 06:35

trans matrix (A)

state x_0 x_1 x_2
x_0 0.5320 0.0984 0.2567
x_1 0.5386 -0.1615 -0.1019
x_2 0.4759 0.4640 0.4202

trans cov (Q)

state x_0 x_1 x_2
x_0 1.4043 0.5126 1.8864
x_1 0.5126 0.1871 0.6885
x_2 1.8864 0.6885 2.5339

trans off

state offset
x_0 -0.0972
x_1 -0.2735
x_2 0.0298

obs matrix (H)

variable x_0 x_1 x_2
y_0 0.1792 0.7014 -0.3164
y_1 -0.5045 0.1311 0.3455
y_2 -0.0962 0.0705 0.1246

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0190 0.0000 0.0000
y_1 0.0000 0.1624 0.0000
y_2 0.0000 0.0000 0.0297

obs off

variable offset
y_0 0.1664
y_1 -0.0433
y_2 0.0232

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.0331 -0.4960 0.2924 0.5946 -0.1127 -0.1933
x_1 0.5991 0.3053 0.2248 0.7649 0.9446 0.0394
x_2 0.0060 0.0013 -0.0667 -0.1143 0.7781 0.6409

init state mean

state mean
x_0 -1.0627
x_1 -0.6392
x_2 -1.2803

init state cov

state x_0 x_1 x_2
x_0 9.0224 3.6789 10.5469
x_1 3.6789 2.3654 5.9687
x_2 10.5469 5.9687 21.9915
NameError: name 'learn1' is not defined
train_show_save(learn, 1, 2e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 160.675568 150.361449 0.266633 0.269324 -1.959343 -53151966368643636872563654656.000000 05:30

trans matrix (A)

state x_0 x_1 x_2
x_0 0.5363 0.1070 0.2723
x_1 0.5375 -0.1534 -0.0948
x_2 0.4909 0.4638 0.4301

trans cov (Q)

state x_0 x_1 x_2
x_0 1.3408 0.4970 1.7922
x_1 0.4970 0.1842 0.6643
x_2 1.7922 0.6643 2.3955

trans off

state offset
x_0 -0.0948
x_1 -0.2568
x_2 0.0393

obs matrix (H)

variable x_0 x_1 x_2
y_0 0.1742 0.6954 -0.3174
y_1 -0.5094 0.1551 0.3429
y_2 -0.1147 0.0583 0.1163

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0121 0.0000 0.0000
y_1 0.0000 0.1100 0.0000
y_2 0.0000 0.0000 0.0190

obs off

variable offset
y_0 0.1697
y_1 -0.0123
y_2 0.0387

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.0363 -0.5225 0.3103 0.5948 -0.0692 -0.1628
x_1 0.5955 0.3523 0.1332 0.7587 0.9820 -0.0509
x_2 -0.0159 0.0573 -0.0471 -0.1409 0.7802 0.6457

init state mean

state mean
x_0 -1.0500
x_1 -0.6014
x_2 -1.2993

init state cov

state x_0 x_1 x_2
x_0 10.9221 3.8061 12.4742
x_1 3.8061 1.7574 5.6427
x_2 12.4742 5.6427 24.9669
train_show_save(learn, 1, 2e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 115.738528 104.632719 0.257947 0.259673 -1.679687 -24519279833493970234084687872.000000 05:09

trans matrix (A)

state x_0 x_1 x_2
x_0 0.5303 0.1124 0.2795
x_1 0.5333 -0.1470 -0.0908
x_2 0.4963 0.4602 0.4322

trans cov (Q)

state x_0 x_1 x_2
x_0 1.2399 0.4645 1.6526
x_1 0.4645 0.1740 0.6191
x_2 1.6526 0.6191 2.2026

trans off

state offset
x_0 -0.0883
x_1 -0.2505
x_2 0.0440

obs matrix (H)

variable x_0 x_1 x_2
y_0 0.1715 0.6898 -0.3162
y_1 -0.5142 0.1782 0.3403
y_2 -0.1173 0.0543 0.1176

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0079 0.0000 0.0000
y_1 0.0000 0.0732 0.0000
y_2 0.0000 0.0000 0.0124

obs off

variable offset
y_0 0.1699
y_1 -0.0058
y_2 0.0383

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.0449 -0.5621 0.3391 0.5937 0.0285 -0.1154
x_1 0.5860 0.3917 0.0584 0.7468 1.0129 -0.1247
x_2 -0.0479 0.1664 -0.0280 -0.1814 0.7807 0.6422

init state mean

state mean
x_0 -1.0488
x_1 -0.5958
x_2 -1.3038

init state cov

state x_0 x_1 x_2
x_0 12.1503 4.1501 13.5248
x_1 4.1501 1.7544 5.9730
x_2 13.5248 5.9730 27.6423
learn.model.use_smooth = True
show_results(learn, items = [learn.dls.valid.items[i] for i in [10, 100, 300, 500, 700, 1000]])
train_show_save(learn, 1, 1e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 30.829145 26.059960 0.201186 0.215770 -0.034140 -48287381455002891037683220480.000000 06:32

trans matrix (A)

state x_0 x_1 x_2
x_0 0.5309 0.1159 0.2797
x_1 0.5313 -0.1385 -0.0881
x_2 0.4961 0.4560 0.4324

trans cov (Q)

state x_0 x_1 x_2
x_0 1.1473 0.4769 1.5832
x_1 0.4769 0.2008 0.6682
x_2 1.5832 0.6682 2.2249

trans off

state offset
x_0 -0.0894
x_1 -0.2426
x_2 0.0426

obs matrix (H)

variable x_0 x_1 x_2
y_0 0.1711 0.6883 -0.3146
y_1 -0.5151 0.1887 0.3414
y_2 -0.1155 0.0454 0.1145

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0063 0.0000 0.0000
y_1 0.0000 0.0583 0.0000
y_2 0.0000 0.0000 0.0097

obs off

variable offset
y_0 0.1747
y_1 0.0044
y_2 0.0287

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.0475 -0.5920 0.3650 0.5948 0.0563 -0.0801
x_1 0.5812 0.4322 -0.0088 0.7377 1.0076 -0.1962
x_2 -0.0461 0.1875 -0.0228 -0.1820 0.7675 0.6408

init state mean

state mean
x_0 -1.0477
x_1 -0.5959
x_2 -1.2990

init state cov

state x_0 x_1 x_2
x_0 12.4282 4.5316 13.6935
x_1 4.5316 2.1406 6.4461
x_2 13.6935 6.4461 27.6478
train_show_save(learn, 1, 1e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 2.643850 -2.886783 0.193556 0.212050 0.165402 -29540765662186335322106757120.000000 06:27

trans matrix (A)

state x_0 x_1 x_2
x_0 0.5339 0.1210 0.2783
x_1 0.5270 -0.1285 -0.0862
x_2 0.4976 0.4502 0.4350

trans cov (Q)

state x_0 x_1 x_2
x_0 1.0913 0.4682 1.5174
x_1 0.4682 0.2037 0.6628
x_2 1.5174 0.6628 2.1595

trans off

state offset
x_0 -0.0873
x_1 -0.2397
x_2 0.0401

obs matrix (H)

variable x_0 x_1 x_2
y_0 0.1690 0.6841 -0.3145
y_1 -0.5175 0.1944 0.3405
y_2 -0.1099 0.0372 0.1142

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0051 0.0000 0.0000
y_1 0.0000 0.0467 0.0000
y_2 0.0000 0.0000 0.0078

obs off

variable offset
y_0 0.1766
y_1 0.0070
y_2 0.0210

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.0614 -0.6233 0.4026 0.5869 0.0964 -0.0301
x_1 0.5842 0.4624 -0.0654 0.7358 0.9837 -0.2582
x_2 -0.0390 0.2132 -0.0130 -0.1775 0.7580 0.6438

init state mean

state mean
x_0 -1.0447
x_1 -0.5957
x_2 -1.2961

init state cov

state x_0 x_1 x_2
x_0 13.1380 4.8428 14.4531
x_1 4.8428 2.3677 6.8288
x_2 14.4531 6.8288 28.0978

Loss only gap

learn.model.use_smooth = True
learn.loss_func = KalmanLoss(only_gap=True)
train_show_save(learn, 1,  5e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 -3.892116 -2.872949 0.120294 0.204105 -2.923830 -85436991145187843135533744128.000000 04:26

trans matrix (A)

state x_0 x_1 x_2
x_0 0.2027 0.3761 0.2969
x_1 0.3594 0.3686 0.3606
x_2 0.4679 0.2295 0.2852

trans cov (Q)

state x_0 x_1 x_2
x_0 0.0220 0.0113 -0.0224
x_1 0.0113 0.0064 -0.0097
x_2 -0.0224 -0.0097 0.0290

trans off

state offset
x_0 0.3025
x_1 -0.5231
x_2 0.2064

obs matrix (H)

variable x_0 x_1 x_2
y_0 -0.1489 0.3517 -0.0053
y_1 0.2922 0.1890 -0.4105
y_2 0.3920 -0.0638 0.2473

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0001 0.0000 0.0000
y_1 0.0000 0.0054 0.0000
y_2 0.0000 0.0000 0.0003

obs off

variable offset
y_0 0.2038
y_1 0.1323
y_2 -0.1988

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -0.9856 0.6178 0.4694 -0.5368 0.5367 -0.0159
x_1 0.6667 0.5327 -0.3882 0.6907 -0.0209 -0.2365
x_2 0.0330 -0.7175 -0.1714 0.2048 -0.6491 0.1349

init state mean

state mean
x_0 0.0922
x_1 -0.6262
x_2 0.2368

init state cov

state x_0 x_1 x_2
x_0 8.1062 12.7070 6.0908
x_1 12.7070 21.6312 10.2860
x_2 6.0908 10.2860 5.5679

Smoother

learn1.model.use_smooth = True
train_show_save(learn1, 1,  5e-3)
epoch train_loss valid_loss rmse rmse_gap r2 r2_gap time
0 -415.543654 -449.643886 0.055635 0.200575 0.759219 -45104963579554009935054372864.000000 07:43

trans matrix (A)

state x_0 x_1 x_2
x_0 0.1815 0.3765 0.3138
x_1 0.3401 0.3500 0.3764
x_2 0.4904 0.2297 0.2438

trans cov (Q)

state x_0 x_1 x_2
x_0 0.0491 0.0170 -0.0614
x_1 0.0170 0.0127 -0.0243
x_2 -0.0614 -0.0243 0.0889

trans off

state offset
x_0 0.3080
x_1 -0.5210
x_2 0.2044

obs matrix (H)

variable x_0 x_1 x_2
y_0 -0.1630 0.3552 -0.0202
y_1 0.3143 0.1963 -0.4623
y_2 0.4022 -0.0653 0.2466

obs cov (R)

variable y_0 y_1 y_2
y_0 0.0001 0.0000 0.0000
y_1 0.0000 0.0054 0.0000
y_2 0.0000 0.0000 0.0003

obs off

variable offset
y_0 0.2096
y_1 0.1459
y_2 -0.2079

contr matrix (B)

state c_0 c_1 c_2 c_3 c_4 c_5
x_0 -1.0006 0.5988 0.4673 -0.5447 0.5317 -0.0122
x_1 0.6825 0.5156 -0.3620 0.7188 -0.0186 -0.1805
x_2 0.0303 -0.7130 -0.1672 0.2100 -0.6278 0.1642

init state mean

state mean
x_0 0.0933
x_1 -0.6332
x_2 0.2392

init state cov

state x_0 x_1 x_2
x_0 8.2929 13.0004 6.2311
x_1 13.0004 22.1310 10.5205
x_2 6.2311 10.5205 5.6921
# learn1.save("model_16_jan1")
Path('models/model_16_jan1.pth')